# I load the needed libraries
library(dplyr)
library(scales)
library(GoFKernel)
library(mvtnorm)
library(gplots)
options(warn=-1)
Attaching package: ‘dplyr’
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
Loading required package: KernSmooth
KernSmooth 2.23 loaded
Copyright M. P. Wand 1997-2009
Attaching package: ‘gplots’
The following object is masked from ‘package:stats’:
lowess
I load the functions from the class file:
source("class_MCMC.R")
I define then the function that I want to use as output of the MCMCs:
# Function to sampled from: n-dim gaussian with chosen sigmas and centers
posterior_g_inhom = function (theta) {
sigmas = c(1:length(theta))
centers = c(seq(length(theta), 1))
product = 1
for (i in 1:length(theta)) {
product = product * exp(-(theta[i] - centers[i])**2/sigmas[i]**2)
}
return (product)
}
chosen_function = posterior_g_inhom
Then I only have to determine the parameters for the initialization = the "hyperparameters" of the simulations
# The initial parameters are:
init = c(4, 4, 4)
std = diag(1, 3)
N = as.integer(1e5)
burn_in = as.integer(1e4)
print_step = as.integer(1e2)
# print_init = as.integer(1e3)
N_tot = N + burn_in
# For Haario:
epsilon = 0.001
# MVTNORM
# Evaluate then the MCMC
mcmc_g = random_steps_mvtnorm (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
# Selecting the sequence after the burn-in
mcmc_g = mcmc_g[burn_in:N, ]
# Plotting the results
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 49.43 %
# MVTNORM GIBBS
mcmc_g = random_steps_mvtnorm_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 60.15364 %
# # SIMPLE ADAPTIVE
# mcmc_g = random_steps_simple (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# # SIMPLE ADAPTIVE GIBBS
# mcmc_g = random_steps_simple_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# HAARIO
mcmc_g = random_steps_haario (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 13.24455 %
Final mean = 3.008452 2.003238 1.11752
Final covariance matrix =
[,1] [,2] [,3]
[1,] 32.08552 20.648844 11.599098
[2,] 20.64884 17.207206 7.684005
[3,] 11.59910 7.684005 12.050366
# HAARIO GIBBS
mcmc_g = random_steps_haario_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 16.50818 %
Final mean = 3.015303 2.003318 0.9645052
Final covariance matrix =
[,1] [,2] [,3]
[1,] 32.270189 20.732281 9.991341
[2,] 20.732281 16.838466 6.613276
[3,] 9.991341 6.613276 10.079051
# RAO
mcmc_g = random_steps_AM_rao (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 49.90091 %
Final mean = 2.98207 1.988313 0.9291202
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.50346133 0.02171594 0.04661120
[2,] 0.02171594 2.06759405 0.02720914
[3,] 0.04661120 0.02720914 4.48273484
# RAO GIBBS
mcmc_g = random_steps_AM_rao_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 62.15818 %
Final mean = 3.011712 2.065499 0.9665873
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.56844414 0.012810382 -0.025676793
[2,] 0.01281038 1.846701151 0.003708303
[3,] -0.02567679 0.003708303 4.090484001
# GLOBAL
mcmc_g = random_steps_global (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 42.52182 %
Final mean = 2.974244 1.934685 0.8937542
Final lambda = 1.8548
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.50716189 -0.0945125 -0.03599242
[2,] -0.09451250 1.9903200 -0.23646611
[3,] -0.03599242 -0.2364661 4.34388241
# GLOBAL GIBBS
mcmc_g = random_steps_global_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 56.42212 %
Final mean = 2.984221 1.940497 1.045683
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.59388336 -0.0136597 -0.04840148
[2,] -0.01365970 1.8206692 -0.11124697
[3,] -0.04840148 -0.1112470 3.70285292